const runtime.pageSize
62 uses
runtime (current package)
malloc.go#L120: pageSize = _PageSize
malloc.go#L124: maxObjsPerSpan = pageSize / 8
malloc.go#L264: pagesPerArena = heapArenaBytes / pageSize
mbitmap.go#L1887: pages := divRoundUp(bitmapBytes, pageSize)
mcache.go#L191: gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc))
mcache.go#L218: atomic.Xadduintptr(&stats.largeAlloc, npages*pageSize)
mcache.go#L223: gcController.update(int64(s.npages*pageSize), 0)
mgcscavenge.go#L83: maxPagesPerPhysPage = maxPhysPageSize / pageSize
mgcscavenge.go#L621: maxPages := max / pageSize
mgcscavenge.go#L622: if max%pageSize != 0 {
mgcscavenge.go#L631: minPages := physPageSize / pageSize
mgcscavenge.go#L698: return uintptr(npages) * pageSize, work
mgcscavenge.go#L728: return uintptr(npages) * pageSize, work
mgcscavenge.go#L753: addr := chunkBase(ci) + uintptr(base)*pageSize
mgcscavenge.go#L775: sysUnused(unsafe.Pointer(addr), uintptr(npages)*pageSize)
mgcscavenge.go#L779: nbytes := int64(npages) * pageSize
mgcscavenge.go#L981: if physHugePageSize > pageSize && physHugePageSize > physPageSize {
mgcscavenge.go#L988: pagesPerHugePage := uintptr(physHugePageSize / pageSize)
mgcwork.go#L26: if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
mgcwork.go#L375: s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf)
mheap.go#L653: return ha.spans[(p/pageSize)%pagesPerArena]
mheap.go#L664: return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena]
mheap.go#L691: pageIdx = ((p / pageSize) / 8) % uintptr(len(arena.pageInUse))
mheap.go#L692: pageMask = byte(1 << ((p / pageSize) % 8))
mheap.go#L871: traceGCSweepSpan((n0 - nFreed) * pageSize)
mheap.go#L944: p := base / pageSize
mheap.go#L950: ai = arenaIndex(base + n*pageSize)
mheap.go#L992: arenaLimit := arenaBase + npage*pageSize
mheap.go#L1015: npage -= (arenaLimit - arenaBase) / pageSize
mheap.go#L1128: needPhysPageAlign := physPageAlignedStacks && typ == spanAllocStack && pageSize < physPageSize
mheap.go#L1162: npages += physPageSize / pageSize
mheap.go#L1190: npages -= physPageSize / pageSize
mheap.go#L1195: h.pages.free(allocBase, spaceBefore/pageSize, false)
mheap.go#L1197: spaceAfter := (allocPages-npages)*pageSize - spaceBefore
mheap.go#L1199: h.pages.free(base+npages*pageSize, spaceAfter/pageSize, false)
mheap.go#L1231: nbytes := npages * pageSize
mheap.go#L1235: s.limit = s.base() + s.npages*pageSize
mheap.go#L1347: ask := alignUp(npage, pallocChunkPages) * pageSize
mheap.go#L1488: nbytes := s.npages * pageSize
mheap.go#L1690: arenaPage := (s.base() / pageSize) % pagesPerArena
mheap.go#L1698: arenaPage := (s.base() / pageSize) % pagesPerArena
mpagealloc.go#L59: pallocChunkBytes = pallocChunkPages * pageSize
mpagealloc.go#L113: return uint(p % pallocChunkBytes / pageSize)
mpagealloc.go#L423: p.update(base, size/pageSize, true, false)
mpagealloc.go#L438: limit := base + npages*pageSize - 1
mpagealloc.go#L525: limit := base + npages*pageSize - 1
mpagealloc.go#L550: return uintptr(scav) * pageSize
mpagealloc.go#L720: foundFree(levelIndexToOffAddr(l, i+j), (uintptr(1)<<logMaxPages)*pageSize)
mpagealloc.go#L758: addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr()
mpagealloc.go#L799: addr := chunkBase(ci) + uintptr(j)*pageSize
mpagealloc.go#L803: searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize
mpagealloc.go#L842: addr = chunkBase(i) + uintptr(j)*pageSize
mpagealloc.go#L843: searchAddr = offAddr{chunkBase(i) + uintptr(searchIdx)*pageSize}
mpagealloc.go#L888: limit := base + npages*pageSize - 1
mpagecache.go#L47: return c.base + i*pageSize, uintptr(scav) * pageSize
mpagecache.go#L67: return c.base + uintptr(i*pageSize), uintptr(scav) * pageSize
mpagecache.go#L135: base: chunkBase(ci) + alignDown(uintptr(j), 64)*pageSize,
mpagecache.go#L152: base: alignDown(addr, 64*pageSize),
mpagecache.go#L175: p.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)}